import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import cv2
import glob
import time
from sklearn.svm import LinearSVC
from sklearn.preprocessing import StandardScaler
from skimage.feature import hog
from sklearn.model_selection import train_test_split
from skimage.feature import hog
%matplotlib inline
# Define a function to return HOG features and visualization
def get_hog_features(img, orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True):
# Call with two outputs if vis==True
if vis == True:
features, hog_image = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features, hog_image
# Otherwise call with one output
else:
features = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features
def bin_spatial(img, size=(32, 32)):
color1 = cv2.resize(img[:,:,0], size).ravel()
color2 = cv2.resize(img[:,:,1], size).ravel()
color3 = cv2.resize(img[:,:,2], size).ravel()
return np.hstack((color1, color2, color3))
# Define a function to compute color histogram features
def color_hist(img, nbins=32, bins_range=(0, 256)):
# Compute the histogram of the color channels separately
channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)
channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)
channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))
# Return the individual histograms, bin_centers and feature vector
return hist_features
# Define a function to extract features from a list of images
def img_features(feature_image, spatial_feat, hist_feat, hog_feat, hist_bins, orient,
pix_per_cell, cell_per_block, hog_channel):
file_features = []
if spatial_feat == True:
spatial_features = bin_spatial(feature_image, size=spatial_size)
#print 'spat', spatial_features.shape
file_features.append(spatial_features)
if hist_feat == True:
# Apply color_hist()
hist_features = color_hist(feature_image, nbins=hist_bins)
#print 'hist', hist_features.shape
file_features.append(hist_features)
if hog_feat == True:
# Call get_hog_features() with vis=False, feature_vec=True
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.extend(get_hog_features(feature_image[:,:,channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
else:
feature_image = cv2.cvtColor(feature_image, cv2.COLOR_LUV2RGB)
feature_image = cv2.cvtColor(feature_image, cv2.COLOR_RGB2GRAY)
hog_features = get_hog_features(feature_image[:,:], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
#print 'hog', hog_features.shape
# Append the new feature vector to the features list
file_features.append(hog_features)
return file_features
# Define a function to extract features from a list of images
# Have this function call bin_spatial() and color_hist()
def extract_features(imgs, color_space='RGB', spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=0,
spatial_feat=True, hist_feat=True, hog_feat=True):
# Create a list to append feature vectors to
features = []
# Iterate through the list of images
for file_p in imgs:
file_features = []
image = cv2.imread(file_p) # Read in each imageone by one
# apply color conversion if other than 'RGB'
if color_space != 'RGB':
if color_space == 'HSV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
else: feature_image = np.copy(image)
file_features = img_features(feature_image, spatial_feat, hist_feat, hog_feat, hist_bins, orient,
pix_per_cell, cell_per_block, hog_channel)
features.append(np.concatenate(file_features))
feature_image=cv2.flip(feature_image,1) # Augment the dataset with flipped images
file_features = img_features(feature_image, spatial_feat, hist_feat, hog_feat, hist_bins, orient,
pix_per_cell, cell_per_block, hog_channel)
features.append(np.concatenate(file_features))
return features # Return list of feature vectors
images = glob.glob('*data/*/*/*')
cars = []
notcars = []
for image in images:
if 'non' in image:
notcars.append(image)
else:
cars.append(image)
print(len(notcars))
print(len(cars))
It's pretty balanced, so isn't necessary make a data augmentation strategy
To validate all process, i'll make a simple plot, showing the hog features
def show_hog_features(img):
img_cspaced = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
orient = 9
pix_per_cell=8
cell_per_block=2
_, hog_y = get_hog_features(img_cspaced[:,:,0],
orient,
pix_per_cell,
cell_per_block,
vis=True,
feature_vec=True)
_, hog_Cr = get_hog_features(img_cspaced[:,:,1],
orient,
pix_per_cell,
cell_per_block,
vis=True,
feature_vec=True)
_, hog_Cb = get_hog_features(img_cspaced[:,:,2],
orient,
pix_per_cell,
cell_per_block,
vis=True,
feature_vec=True)
fig, ax = plt.subplots(ncols=4, figsize=(20,20))
ax[0].imshow(img)
ax[1].imshow(hog_y, cmap='gray')
ax[1].set_title('hog_y')
ax[2].imshow(hog_Cr, cmap='gray')
ax[2].set_title('hog_Cr')
ax[3].imshow(hog_Cb, cmap='gray')
ax[3].set_title('hog_Cb')
show_hog_features(mpimg.imread(cars[0]))
show_hog_features(mpimg.imread(notcars[0]))
Now the "magic". I train a SVC with features extracted (throw extra_features function) of all dataset (cars and not cars). Thanks to LUV color space and all hog features, I can reach a 99% of accuracy.
I use the train_test_split function to make a 80% (train) - 20% (test) split.
# Define parameters for feature extraction
color_space = 'LUV' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
orient = 9
pix_per_cell = 8
cell_per_block = 2
hog_channel = "ALL" # Can be 0, 1, 2, or "ALL"
spatial_size = (32, 32) # Spatial binning dimensions
hist_bins = 32 # Number of histogram bins
spatial_feat = True # Spatial features on or off
hist_feat = True # Histogram features on or off
hog_feat = True # HOG features on or off
car_features = extract_features(cars, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
notcar_features = extract_features(notcars, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
X = np.vstack((car_features, notcar_features)).astype(np.float64)
X_scaler = StandardScaler().fit(X) # Fit a per-column scaler
scaled_X = X_scaler.transform(X) # Apply the scaler to X
y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features)))) # Define the labels vector
# Split up data into randomized training and test sets
X_train, X_test, y_train, y_test = train_test_split(scaled_X, y, test_size=0.2, random_state=99)
print('Using:',orient,'orientations',pix_per_cell,
'pixels per cell and', cell_per_block,'cells per block')
print('Feature vector length:', len(X_train[0]))
# Use a linear SVC
svc = LinearSVC()
# Check the training time for the SVC
svc.fit(X_train, y_train)
# Check the score of the SVC
print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))
In slide_windows generate a list of boxes to draw the this list in a image.
# Define a function that takes an image,
# start and stop positions in both x and y,
# window size (x and y dimensions),
# and overlap fraction (for both x and y)
def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None],
xy_window=(64, 64), xy_overlap=(0.5, 0.5)):
# If x and/or y start/stop positions not defined, set to image size
if x_start_stop[0] == None:
x_start_stop[0] = 0
if x_start_stop[1] == None:
x_start_stop[1] = img.shape[1]
if y_start_stop[0] == None:
y_start_stop[0] = 0
if y_start_stop[1] == None:
y_start_stop[1] = img.shape[0]
# Compute the span of the region to be searched
xspan = x_start_stop[1] - x_start_stop[0]
yspan = y_start_stop[1] - y_start_stop[0]
# Compute the number of pixels per step in x/y
nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0]))
ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1]))
# Compute the number of windows in x/y
nx_buffer = np.int(xy_window[0]*(xy_overlap[0]))
ny_buffer = np.int(xy_window[1]*(xy_overlap[1]))
nx_windows = np.int((xspan-nx_buffer)/nx_pix_per_step)
ny_windows = np.int((yspan-ny_buffer)/ny_pix_per_step)
# Initialize a list to append window positions to
window_list = []
# Loop through finding x and y window positions
# Note: you could vectorize this step, but in practice
# you'll be considering windows one by one with your
# classifier, so looping makes sense
for ys in range(ny_windows):
for xs in range(nx_windows):
# Calculate window position
startx = xs*nx_pix_per_step + x_start_stop[0]
endx = startx + xy_window[0]
starty = ys*ny_pix_per_step + y_start_stop[0]
endy = starty + xy_window[1]
# Append window position to list
window_list.append(((startx, starty), (endx, endy)))
# Return the list of windows
return window_list
# Define a function to draw bounding boxes
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
# Define a function to extract features from a single image window
# This function is very similar to extract_features()
# just for a single image rather than list of images
def single_img_features(img, color_space='RGB', spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=0,
spatial_feat=True, hist_feat=True, hog_feat=True):
#1) Define an empty list to receive features
img_features = []
#2) Apply color conversion if other than 'RGB'
if color_space != 'RGB':
if color_space == 'HSV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
else: feature_image = np.copy(img)
#3) Compute spatial features if flag is set
if spatial_feat == True:
spatial_features = bin_spatial(feature_image, size=spatial_size)
#4) Append features to list
img_features.append(spatial_features)
#5) Compute histogram features if flag is set
if hist_feat == True:
hist_features = color_hist(feature_image, nbins=hist_bins)
#6) Append features to list
img_features.append(hist_features)
#7) Compute HOG features if flag is set
if hog_feat == True:
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.extend(get_hog_features(feature_image[:,:,channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
else:
hog_features = get_hog_features(feature_image[:,:,hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
#8) Append features to list
img_features.append(hog_features)
#9) Return concatenated array of features
return np.concatenate(img_features)
search_windows iterate all over the image making windows to predict if there is a car or not.
# Define a function you will pass an image
# and the list of windows to be searched (output of slide_windows())
def search_windows(img, windows, clf, scaler, color_space='RGB',
spatial_size=(32, 32), hist_bins=32,
hist_range=(0, 256), orient=9,
pix_per_cell=8, cell_per_block=2,
hog_channel=0, spatial_feat=True,
hist_feat=True, hog_feat=True):
#1) Create an empty list to receive positive detection windows
on_windows = []
#2) Iterate over all windows in the list
for window in windows:
#3) Extract the test window from original image
test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
#4) Extract features for that window using single_img_features()
features = single_img_features(test_img, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
#5) Scale extracted features to be fed to classifier
test_features = scaler.transform(np.array(features).reshape(1, -1))
#6) Predict using your classifier
prediction = clf.predict(test_features)
#7) If positive (prediction == 1) then save the window
# print(prediction, test_features)
if prediction == 1:
on_windows.append(window)
#8) Return windows for positive detections
return on_windows
Because now I can "windowized" the image, I going gatter all functions to find cars in each image.
def show_img(img):
if len(img.shape)==3: #Color BGR image
plt.figure()
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
else: # Grayscale image
plt.figure()
plt.imshow(img, cmap='gray')
t=time.time() # Start time
for image_p in glob.glob('test_images/test*.jpg'):
image = cv2.imread(image_p)
draw_image = np.copy(image)
windows = slide_window(image, x_start_stop=[None, None], y_start_stop=[400, 640],
xy_window=(128, 128), xy_overlap=(0.85, 0.85))
hot_windows = []
hot_windows += (search_windows(image, windows, svc, X_scaler, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat))
window_img = draw_boxes(draw_image, hot_windows, color=(0, 0, 255), thick=6)
show_img(window_img)
print(round(time.time()-t, 2), 'Seconds to process test images')
def convert_color(img, conv='RGB2YCrCb'):
if conv == 'RGB2YCrCb':
return cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
if conv == 'BGR2YCrCb':
return cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
if conv == 'RGB2LUV':
return cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
def find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins):
draw_img = np.copy(img)
img = np.copy(img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
# img = img.astype(np.float32)/255
ctrans_tosearch = img[ystart:ystop,:,:]
if scale != 1:
imshape = ctrans_tosearch.shape
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))
ch1 = ctrans_tosearch[:,:,0]
ch2 = ctrans_tosearch[:,:,1]
ch3 = ctrans_tosearch[:,:,2]
# Define blocks and steps as above
nxblocks = (ch1.shape[1] // pix_per_cell) - cell_per_block + 1
nyblocks = (ch1.shape[0] // pix_per_cell) - cell_per_block + 1
nfeat_per_block = orient*cell_per_block**2
# 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
window = 64
nblocks_per_window = (window // pix_per_cell) - cell_per_block + 1
cells_per_step = 2 # Instead of overlap, define how many cells to step
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step + 1
nysteps = (nyblocks - nblocks_per_window) // cells_per_step + 1
# Compute individual channel HOG features for the entire image
hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)
boxes = []
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb*cells_per_step
xpos = xb*cells_per_step
# Extract HOG for this patch
hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
xleft = xpos*pix_per_cell
ytop = ypos*pix_per_cell
# Extract the image patch
subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (64,64))
# Get color features
spatial_features = bin_spatial(subimg, size=spatial_size)
hist_features = color_hist(subimg, nbins=hist_bins)
# Scale features and make a prediction
X = np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1)
test_features = X_scaler.transform(X)
#test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1))
test_prediction = svc.predict(test_features)
if test_prediction == 1:
xbox_left = np.int(xleft*scale)
ytop_draw = np.int(ytop*scale)
win_draw = np.int(window*scale)
tmp_window = ((xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart))
cv2.rectangle(draw_img, tmp_window[0], tmp_window[1], (0,0,255),6)
boxes.append(tmp_window)
return draw_img, boxes
ystart = 390
ystop = 656
scale = 1.2
for image_p in glob.glob('test_images/test*.jpg'):
image = cv2.imread(image_p)
out_img, _ = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
show_img(out_img)
The search_windows produces a lot of redundant true positives, so the point is not make this redundant positives and also remove the false positives. To reach this, I use a heat map strategy, adding +1 for all pixels within where a positive detection
from scipy.ndimage.measurements import label
from collections import deque
def add_heat(heatmap, bbox_list):
# Iterate through list of bboxes
for box in bbox_list:
# Add += 1 for all pixels inside each bbox
# Assuming each "box" takes the form ((x1, y1), (x2, y2))
heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
# Return updated heatmap
return heatmap# Iterate through list of bboxes
def apply_threshold(heatmap, threshold):
# Zero out pixels below the threshold
heatmap[heatmap <= threshold] = 0
# Return thresholded map
return heatmap
def draw_labeled_bboxes(img, labels):
# Iterate through all detected cars
for car_number in range(1, labels[1]+1):
# Find pixels with each car_number label value
nonzero = (labels[0] == car_number).nonzero()
# Identify x and y values of those pixels
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Define a bounding box based on min/max x and y
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
# Draw the box on the image
cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)
# Return the image
return img
history_heatmap=deque(maxlen=8)
def sum_heat():
total = np.zeros(history_heatmap[0].shape)
for heat in history_heatmap:
total += heat
# total = total/len(history_heatmap)
return total
def filter_boxes(image, box_list):
heat = np.zeros_like(image[:,:,0]).astype(np.float)
# Add heat to each box in box list
heat = add_heat(heat,box_list)
history_heatmap.append(heat)
# Apply threshold to help remove false positives
heat = apply_threshold(sum_heat(), 3)
# Visualize the heatmap when displaying
heatmap = np.clip(heat, 0, 255)
# Find final boxes from heatmap using label function
labels = label(heatmap)
draw_img = draw_labeled_bboxes(np.copy(image), labels)
return draw_img, heatmap
for image_p in glob.glob('test_images/test*.jpg'):
image = cv2.imread(image_p)
out_img, boxes = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
draw_img, heatmap = filter_boxes(image, boxes)
fig = plt.figure(figsize=(20,20))
plt.subplot(121)
plt.imshow(cv2.cvtColor(draw_img, cv2.COLOR_BGR2RGB))
plt.title('Car Positions')
plt.subplot(122)
plt.imshow(heatmap, cmap='hot')
plt.title('Heat Map')
fig.tight_layout()
This functions acts like an API.
vehicle_detect get an image as input and return this image with boxes around the cars.vehicle_detect_and_line_lines get an image as input and return this image with boxes around the cars and the result of the project 4 (Advanced Lane Lines detect)def vehicle_detect(image):
# import scipy.misc
# import time
# scipy.misc.imsave('/tmp/{}.jpg'.format(time.time()), image)
image = np.copy(cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
_ , boxes = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
draw_img, _ = filter_boxes(image, boxes)
return cv2.cvtColor(draw_img, cv2.COLOR_BGR2RGB)
import lane_finding
def vehicle_detect_and_line_lines(image):
image = np.copy(cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
_ , boxes = find_cars(image, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
draw_img, _ = filter_boxes(lane_finding.process_image(image), boxes)
return cv2.cvtColor(draw_img, cv2.COLOR_BGR2RGB)
image = cv2.imread('test_images/test1.jpg')
orig_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
vehicle_detect_img = vehicle_detect(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
lane_finding_img = lane_finding.process_image(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
both_img = vehicle_detect_and_line_lines(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
fig = plt.figure(figsize=(20,20))
plt.subplot(141)
plt.imshow(orig_img)
plt.title('Original')
plt.subplot(142)
plt.imshow(vehicle_detect_img)
plt.title('Car Position')
fig.tight_layout()
plt.subplot(143)
plt.imshow(lane_finding_img)
plt.title('Lane lines')
fig.tight_layout()
plt.subplot(144)
plt.imshow(both_img)
plt.title('Lane lines and Car Position')
fig.tight_layout()
Now, time to process all frames of a video with whole project, as well as the lane lines detection
from moviepy.editor import VideoFileClip
output_v = 'project_video_proc.mp4'
clip1 = VideoFileClip("project_video.mp4")
clip = clip1.fl_image(vehicle_detect_and_line_lines)
%time clip.write_videofile(output_v, audio=False)